summaryrefslogtreecommitdiffstats
path: root/src/core/hle/kernel/k_code_memory.cpp
blob: 89df6b5d88f7e0a3d52fbdc7808ac1e14da447c6 (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
// SPDX-FileCopyrightText: Copyright 2021 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later

#include "common/alignment.h"
#include "common/common_types.h"
#include "core/device_memory.h"
#include "core/hle/kernel/k_code_memory.h"
#include "core/hle/kernel/k_light_lock.h"
#include "core/hle/kernel/k_memory_block.h"
#include "core/hle/kernel/k_page_group.h"
#include "core/hle/kernel/k_page_table.h"
#include "core/hle/kernel/k_process.h"
#include "core/hle/kernel/slab_helpers.h"
#include "core/hle/kernel/svc_types.h"
#include "core/hle/result.h"

namespace Kernel {

KCodeMemory::KCodeMemory(KernelCore& kernel)
    : KAutoObjectWithSlabHeapAndContainer{kernel}, m_lock(kernel) {}

Result KCodeMemory::Initialize(Core::DeviceMemory& device_memory, VAddr addr, size_t size) {
    // Set members.
    m_owner = GetCurrentProcessPointer(m_kernel);

    // Get the owner page table.
    auto& page_table = m_owner->PageTable();

    // Construct the page group.
    m_page_group.emplace(m_kernel, page_table.GetBlockInfoManager());

    // Lock the memory.
    R_TRY(page_table.LockForCodeMemory(std::addressof(*m_page_group), addr, size))

    // Clear the memory.
    for (const auto& block : *m_page_group) {
        std::memset(device_memory.GetPointer<void>(block.GetAddress()), 0xFF, block.GetSize());
    }

    // Set remaining tracking members.
    m_owner->Open();
    m_address = addr;
    m_is_initialized = true;
    m_is_owner_mapped = false;
    m_is_mapped = false;

    // We succeeded.
    R_SUCCEED();
}

void KCodeMemory::Finalize() {
    // Unlock.
    if (!m_is_mapped && !m_is_owner_mapped) {
        const size_t size = m_page_group->GetNumPages() * PageSize;
        m_owner->PageTable().UnlockForCodeMemory(m_address, size, *m_page_group);
    }

    // Close the page group.
    m_page_group->Close();
    m_page_group->Finalize();

    // Close our reference to our owner.
    m_owner->Close();
}

Result KCodeMemory::Map(VAddr address, size_t size) {
    // Validate the size.
    R_UNLESS(m_page_group->GetNumPages() == Common::DivideUp(size, PageSize), ResultInvalidSize);

    // Lock ourselves.
    KScopedLightLock lk(m_lock);

    // Ensure we're not already mapped.
    R_UNLESS(!m_is_mapped, ResultInvalidState);

    // Map the memory.
    R_TRY(GetCurrentProcess(m_kernel).PageTable().MapPageGroup(
        address, *m_page_group, KMemoryState::CodeOut, KMemoryPermission::UserReadWrite));

    // Mark ourselves as mapped.
    m_is_mapped = true;

    R_SUCCEED();
}

Result KCodeMemory::Unmap(VAddr address, size_t size) {
    // Validate the size.
    R_UNLESS(m_page_group->GetNumPages() == Common::DivideUp(size, PageSize), ResultInvalidSize);

    // Lock ourselves.
    KScopedLightLock lk(m_lock);

    // Unmap the memory.
    R_TRY(GetCurrentProcess(m_kernel).PageTable().UnmapPageGroup(address, *m_page_group,
                                                                 KMemoryState::CodeOut));

    // Mark ourselves as unmapped.
    m_is_mapped = false;

    R_SUCCEED();
}

Result KCodeMemory::MapToOwner(VAddr address, size_t size, Svc::MemoryPermission perm) {
    // Validate the size.
    R_UNLESS(m_page_group->GetNumPages() == Common::DivideUp(size, PageSize), ResultInvalidSize);

    // Lock ourselves.
    KScopedLightLock lk(m_lock);

    // Ensure we're not already mapped.
    R_UNLESS(!m_is_owner_mapped, ResultInvalidState);

    // Convert the memory permission.
    KMemoryPermission k_perm{};
    switch (perm) {
    case Svc::MemoryPermission::Read:
        k_perm = KMemoryPermission::UserRead;
        break;
    case Svc::MemoryPermission::ReadExecute:
        k_perm = KMemoryPermission::UserReadExecute;
        break;
    default:
        // Already validated by ControlCodeMemory svc
        UNREACHABLE();
    }

    // Map the memory.
    R_TRY(m_owner->PageTable().MapPageGroup(address, *m_page_group, KMemoryState::GeneratedCode,
                                            k_perm));

    // Mark ourselves as mapped.
    m_is_owner_mapped = true;

    R_SUCCEED();
}

Result KCodeMemory::UnmapFromOwner(VAddr address, size_t size) {
    // Validate the size.
    R_UNLESS(m_page_group->GetNumPages() == Common::DivideUp(size, PageSize), ResultInvalidSize);

    // Lock ourselves.
    KScopedLightLock lk(m_lock);

    // Unmap the memory.
    R_TRY(m_owner->PageTable().UnmapPageGroup(address, *m_page_group, KMemoryState::GeneratedCode));

    // Mark ourselves as unmapped.
    m_is_owner_mapped = false;

    R_SUCCEED();
}

} // namespace Kernel